rcu_lock(current->domain) does not need to disable preemption.
authorKeir Fraser <keir@xen.org>
Thu, 18 Nov 2010 12:26:27 +0000 (12:26 +0000)
committerKeir Fraser <keir@xen.org>
Thu, 18 Nov 2010 12:26:27 +0000 (12:26 +0000)
If the guest sleeps in hypervisor context, it should not be destroyed
until execution reaches a safe point (i.e., guest context). This is
not implemented yet. :-) But the next patch will rely on it, to allow
an HVM guest to execute hypercalls that indirectly invoke __hvm_copy()
within an rcu_lock_current_domain() region.

Signed-off-by: Keir Fraser <keir@xen.org>
xen/arch/x86/mm.c
xen/arch/x86/physdev.c
xen/common/domain.c
xen/include/xen/sched.h

index 3cda1afffdd0be4c7646cd18043f75f152738e48..ec4ac8cbeb4c7379ac4ca8ea53f3a8e0c2723261 100644 (file)
@@ -2759,7 +2759,7 @@ static struct domain *get_pg_owner(domid_t domid)
 
     if ( likely(domid == DOMID_SELF) )
     {
-        pg_owner = rcu_lock_domain(curr);
+        pg_owner = rcu_lock_current_domain();
         goto out;
     }
 
index 6a12f0013e2978af31ad40264451b9bb46fe80f8..7f8e71a28b98b1a442b007033ea87b76b8aec8d0 100644 (file)
@@ -37,10 +37,8 @@ static int physdev_map_pirq(struct physdev_map_pirq *map)
     if ( !map )
         return -EINVAL;
 
-    if ( map->domid == DOMID_SELF )
-        d = rcu_lock_domain(current->domain);
-    else
-        d = rcu_lock_domain_by_id(map->domid);
+    d = (map->domid == DOMID_SELF) ? rcu_lock_current_domain()
+        : rcu_lock_domain_by_id(map->domid);
 
     if ( d == NULL )
         return -ESRCH;
@@ -165,10 +163,8 @@ static int physdev_unmap_pirq(struct physdev_unmap_pirq *unmap)
     struct domain *d;
     int ret;
 
-    if ( unmap->domid == DOMID_SELF )
-        d = rcu_lock_domain(current->domain);
-    else
-        d = rcu_lock_domain_by_id(unmap->domid);
+    d = (unmap->domid == DOMID_SELF) ? rcu_lock_current_domain()
+        : rcu_lock_domain_by_id(unmap->domid);
 
     if ( d == NULL )
         return -ESRCH;
index 1a08636756c265399563d5de78ecc5fc0ea71029..803c640a7f3db803eefe55f64ebcd0e366bb7765 100644 (file)
@@ -398,7 +398,7 @@ struct domain *get_domain_by_id(domid_t dom)
 
 struct domain *rcu_lock_domain_by_id(domid_t dom)
 {
-    struct domain *d;
+    struct domain *d = NULL;
 
     rcu_read_lock(&domlist_read_lock);
 
@@ -407,12 +407,15 @@ struct domain *rcu_lock_domain_by_id(domid_t dom)
           d = rcu_dereference(d->next_in_hashbucket) )
     {
         if ( d->domain_id == dom )
-            return d;
+        {
+            rcu_lock_domain(d);
+            break;
+        }
     }
 
     rcu_read_unlock(&domlist_read_lock);
 
-    return NULL;
+    return d;
 }
 
 int rcu_lock_target_domain_by_id(domid_t dom, struct domain **d)
index e817e3128b231db9564d9b3b36f8bcfa450562d8..2ac43fc4b2b3850fc2492489a6cc8ff6cd844b9d 100644 (file)
@@ -439,18 +439,20 @@ int rcu_lock_target_domain_by_id(domid_t dom, struct domain **d);
 /* Finish a RCU critical region started by rcu_lock_domain_by_id(). */
 static inline void rcu_unlock_domain(struct domain *d)
 {
-    rcu_read_unlock(&domlist_read_lock);
+    if ( d != current->domain )
+        rcu_read_unlock(&domlist_read_lock);
 }
 
 static inline struct domain *rcu_lock_domain(struct domain *d)
 {
-    rcu_read_lock(d);
+    if ( d != current->domain )
+        rcu_read_lock(d);
     return d;
 }
 
 static inline struct domain *rcu_lock_current_domain(void)
 {
-    return rcu_lock_domain(current->domain);
+    return /*rcu_lock_domain*/(current->domain);
 }
 
 struct domain *get_domain_by_id(domid_t dom);